diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -597,405 +198,6 @@ return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -1187,434 +389,35 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -2296,286 +700,20 @@ return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2753,272 +891,6 @@ return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3210,272 +1082,6 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3667,297 +1273,31 @@ return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -4111,139 +1451,6 @@ return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4435,139 +1642,6 @@ return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4759,139 +1833,6 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -5083,139 +2024,6 @@ return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -6171,412 +2979,13 @@ return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -6761,405 +3170,6 @@ return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -7351,430 +3361,31 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7807,524 +3418,125 @@ // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); +void test_vloxseg6ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vloxseg7ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg3ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1( @@ -8460,272 +3672,6 @@ return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -8907,280 +3853,14 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg2ei8_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1( @@ -9374,272 +4054,6 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -9831,272 +4245,6 @@ return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10275,139 +4423,6 @@ return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10573,163 +4588,30 @@ // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg4ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg2ei8_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1( @@ -10923,139 +4805,6 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -11247,139 +4996,6 @@ return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12335,139 +5951,6 @@ return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12659,139 +6142,6 @@ return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12983,139 +6333,6 @@ return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -13307,139 +6524,6 @@ return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -597,405 +198,6 @@ return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1187,434 +389,35 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2296,286 +700,20 @@ return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2753,272 +891,6 @@ return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3210,272 +1082,6 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3667,297 +1273,31 @@ return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } @@ -4111,139 +1451,6 @@ return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4435,139 +1642,6 @@ return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4759,139 +1833,6 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5083,139 +2024,6 @@ return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6171,412 +2979,13 @@ return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -6761,405 +3170,6 @@ return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -7351,430 +3361,31 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } @@ -7807,524 +3418,125 @@ // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( @@ -8460,272 +3672,6 @@ return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -8907,280 +3853,14 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( @@ -9374,272 +4054,6 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -9831,272 +4245,6 @@ return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10275,139 +4423,6 @@ return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10573,163 +4588,30 @@ // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( @@ -10923,139 +4805,6 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -11247,139 +4996,6 @@ return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12335,139 +5951,6 @@ return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12659,139 +6142,6 @@ return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12983,139 +6333,6 @@ return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -13307,139 +6524,6 @@ return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c @@ -0,0 +1,6924 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c @@ -0,0 +1,6924 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -597,405 +198,6 @@ return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -1187,434 +389,35 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -2296,286 +700,20 @@ return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2753,272 +891,6 @@ return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3210,272 +1082,6 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3667,297 +1273,31 @@ return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -4111,139 +1451,6 @@ return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4435,139 +1642,6 @@ return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4759,139 +1833,6 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -5083,139 +2024,6 @@ return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -6171,412 +2979,13 @@ return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -6761,405 +3170,6 @@ return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -7351,430 +3361,31 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7807,524 +3418,125 @@ // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); +void test_vluxseg6ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vluxseg7ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg3ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1( @@ -8460,272 +3672,6 @@ return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -8907,280 +3853,14 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg2ei8_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1( @@ -9374,272 +4054,6 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -9831,272 +4245,6 @@ return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10275,139 +4423,6 @@ return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10573,163 +4588,30 @@ // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg4ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg2ei8_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1( @@ -10923,139 +4805,6 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -11247,139 +4996,6 @@ return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12335,139 +5951,6 @@ return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12659,139 +6142,6 @@ return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12983,139 +6333,6 @@ return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -13307,139 +6524,6 @@ return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -597,405 +198,6 @@ return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1187,434 +389,35 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2296,286 +700,20 @@ return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2753,272 +891,6 @@ return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3210,272 +1082,6 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3667,297 +1273,31 @@ return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } @@ -4111,139 +1451,6 @@ return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4435,139 +1642,6 @@ return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4759,139 +1833,6 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5083,139 +2024,6 @@ return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6171,412 +2979,13 @@ return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -6761,405 +3170,6 @@ return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -7351,430 +3361,31 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } @@ -7807,524 +3418,125 @@ // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( @@ -8460,272 +3672,6 @@ return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -8907,280 +3853,14 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( @@ -9374,272 +4054,6 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -9831,272 +4245,6 @@ return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10275,139 +4423,6 @@ return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10573,163 +4588,30 @@ // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( @@ -10923,139 +4805,6 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -11247,139 +4996,6 @@ return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12335,139 +5951,6 @@ return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12659,139 +6142,6 @@ return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12983,139 +6333,6 @@ return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -13307,139 +6524,6 @@ return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c @@ -0,0 +1,6924 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c @@ -0,0 +1,6924 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c @@ -7134,7132 +7134,3 @@ void test_vsoxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c @@ -7,7134 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); -} - // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) @@ -14262,4 +7134,3 @@ void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c @@ -7134,7132 +7134,3 @@ void test_vsuxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c @@ -7,7134 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); -} - // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) @@ -14262,4 +7134,3 @@ void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -597,405 +198,6 @@ return vloxseg2ei8_v_i8m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -1187,470 +389,71 @@ return vloxseg2ei16_v_i8m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg4ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg5ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vloxseg4ei32_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -2296,301 +700,35 @@ return vloxseg8ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2753,272 +891,6 @@ return vloxseg2ei8_v_i16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3210,272 +1082,6 @@ return vloxseg2ei16_v_i16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3667,318 +1273,52 @@ return vloxseg2ei32_v_i16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg4ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -4111,139 +1451,6 @@ return vloxseg4ei64_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4435,139 +1642,6 @@ return vloxseg2ei8_v_i32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4759,139 +1833,6 @@ return vloxseg2ei16_v_i32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -5083,139 +2024,6 @@ return vloxseg2ei32_v_i32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -6171,448 +2979,49 @@ return vloxseg2ei64_v_i64m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1( @@ -6761,405 +3170,6 @@ return vloxseg2ei8_v_u8m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -7237,517 +3247,118 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg6ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg7ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); +void test_vloxseg8ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg3ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg4ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg2ei16_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1( @@ -7928,405 +3539,6 @@ return vloxseg4ei32_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -8450,280 +3662,14 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1( @@ -8917,272 +3863,6 @@ return vloxseg2ei8_v_u16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -9374,272 +4054,6 @@ return vloxseg2ei16_v_u16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -9831,272 +4245,6 @@ return vloxseg2ei32_v_u16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10251,161 +4399,28 @@ // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg3ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg4ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1( @@ -10599,139 +4614,6 @@ return vloxseg2ei8_v_u32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10923,139 +4805,6 @@ return vloxseg2ei16_v_u32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -11247,139 +4996,6 @@ return vloxseg2ei32_v_u32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12335,139 +5951,6 @@ return vloxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12659,139 +6142,6 @@ return vloxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12983,139 +6333,6 @@ return vloxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -13307,139 +6524,6 @@ return vloxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -597,405 +198,6 @@ return vloxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1187,470 +389,71 @@ return vloxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2296,272 +700,6 @@ return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2746,277 +884,11 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( @@ -3210,272 +1082,6 @@ return vloxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3667,272 +1273,6 @@ return vloxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4111,139 +1451,6 @@ return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4435,139 +1642,6 @@ return vloxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4738,158 +1812,25 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// +void test_vloxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( @@ -5083,139 +2024,6 @@ return vloxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6171,405 +2979,6 @@ return vloxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6739,425 +3148,26 @@ // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( @@ -7351,405 +3361,6 @@ return vloxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -7866,465 +3477,66 @@ // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( @@ -8460,272 +3672,6 @@ return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -8917,272 +3863,6 @@ return vloxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -9363,281 +4043,15 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// +void test_vloxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( @@ -9831,272 +4245,6 @@ return vloxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10275,139 +4423,6 @@ return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10599,139 +4614,6 @@ return vloxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10923,139 +4805,6 @@ return vloxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -11247,139 +4996,6 @@ return vloxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12318,154 +5934,21 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// +void test_vloxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( @@ -12659,139 +6142,6 @@ return vloxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12983,139 +6333,6 @@ return vloxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -13307,139 +6524,6 @@ return vloxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -14395,272 +7479,6 @@ return vloxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -14839,283 +7657,17 @@ return vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +// +void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( @@ -15309,272 +7861,6 @@ return vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15766,272 +8052,6 @@ return vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -16133,347 +8153,81 @@ // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( @@ -16667,272 +8421,6 @@ return vloxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17124,272 +8612,6 @@ return vloxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17581,272 +8803,6 @@ return vloxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c @@ -406,197 +406,6 @@ return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -996,197 +805,6 @@ return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1586,184 +1204,6 @@ return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2163,139 +1603,6 @@ return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2562,197 +1869,6 @@ return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3019,204 +2135,13 @@ return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3476,197 +2401,6 @@ return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3933,184 +2667,6 @@ return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4244,197 +2800,6 @@ return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4568,243 +2933,52 @@ return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -4892,197 +3066,6 @@ return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5216,1009 +3199,852 @@ return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6231,13 +4057,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6252,13 +4078,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6275,13 +4101,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6300,26 +4126,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6328,13 +4154,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6345,13 +4171,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6364,13 +4190,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6385,13 +4211,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6408,13 +4234,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6433,26 +4259,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6461,13 +4287,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6478,13 +4304,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6497,13 +4323,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6518,13 +4344,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6541,13 +4367,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6566,249 +4392,58 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6821,13 +4456,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6842,13 +4477,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6865,13 +4500,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6890,26 +4525,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6918,13 +4553,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6935,13 +4570,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6954,13 +4589,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6975,13 +4610,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6998,13 +4633,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7023,26 +4658,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7051,13 +4686,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7068,13 +4703,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7087,13 +4722,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7108,13 +4743,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7131,13 +4766,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7156,7243 +4791,2136 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4( @@ -14592,264 +7120,73 @@ return vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4( @@ -15118,197 +7455,6 @@ return vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15575,197 +7721,6 @@ return vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15976,238 +7931,60 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( @@ -16476,197 +8253,6 @@ return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -16933,197 +8519,6 @@ return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17390,197 +8785,6 @@ return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17847,180 +9051,3 @@ return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c @@ -0,0 +1,6924 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +zfh -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c @@ -6487,7319 +6487,31 @@ return vlseg2e64_v_f64m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -13808,9 +6520,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -13819,13 +6531,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -13836,9 +6548,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -13849,13 +6561,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -13868,9 +6580,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -13883,13 +6595,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -13904,9 +6616,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -13921,13 +6633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -13944,9 +6656,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -13963,13 +6675,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -13988,9 +6700,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14009,35 +6721,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14046,9 +6758,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14057,13 +6769,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14074,9 +6786,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14087,13 +6799,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14106,9 +6818,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14121,13 +6833,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14142,9 +6854,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14159,13 +6871,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14182,9 +6894,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14201,13 +6913,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14226,9 +6938,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14247,35 +6959,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14284,9 +6996,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14295,13 +7007,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14312,9 +7024,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14325,13 +7037,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14344,9 +7056,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14359,13 +7071,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14380,9 +7092,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14397,13 +7109,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14420,9 +7132,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14439,13 +7151,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14464,9 +7176,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14485,35 +7197,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2_m( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2_m( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14522,9 +7234,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14533,13 +7245,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2_m( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14550,9 +7262,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14563,28 +7275,28 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4_m( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m4(v0, v1, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c @@ -13,31 +13,31 @@ #include -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8(v0, v1, base, vl); +void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -46,9 +46,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -57,13 +57,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8(v0, v1, v2, base, vl); +void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -74,9 +74,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -106,9 +106,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -121,13 +121,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -142,9 +142,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -159,13 +159,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -182,9 +182,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -201,13 +201,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -226,9 +226,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -247,35 +247,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4(v0, v1, base, vl); +void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -284,9 +284,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -295,13 +295,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4(v0, v1, v2, base, vl); +void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -312,9 +312,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -344,9 +344,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -359,13 +359,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -380,9 +380,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -397,13 +397,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -420,9 +420,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -439,13 +439,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -464,9 +464,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -485,35 +485,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2(v0, v1, base, vl); +void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -522,9 +522,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -533,13 +533,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2(v0, v1, v2, base, vl); +void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -550,9 +550,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -563,13 +563,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -582,9 +582,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -597,13 +597,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -618,9 +618,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -635,13 +635,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -658,9 +658,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -677,13 +677,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -702,9 +702,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -723,35 +723,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1(v0, v1, base, vl); +void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -760,9 +760,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -771,13 +771,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1(v0, v1, v2, base, vl); +void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -788,9 +788,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,9 +820,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -835,13 +835,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -856,9 +856,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -896,9 +896,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -940,9 +940,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -961,35 +961,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2(v0, v1, base, vl); +void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -998,9 +998,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1009,13 +1009,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2(v0, v1, v2, base, vl); +void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1026,9 +1026,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1039,57 +1039,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4(v0, v1, base, vl); +void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4(v0, v1, base, vl); +void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,9 +1098,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1109,13 +1109,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4(v0, v1, v2, base, vl); +void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1126,9 +1126,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1139,13 +1139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1158,9 +1158,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1173,13 +1173,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1194,9 +1194,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1211,13 +1211,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1234,9 +1234,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,9 +1278,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1299,35 +1299,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2(v0, v1, base, vl); +void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1336,9 +1336,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1347,13 +1347,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2(v0, v1, v2, base, vl); +void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1364,9 +1364,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1377,13 +1377,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1396,9 +1396,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1411,13 +1411,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1432,9 +1432,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1449,13 +1449,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1472,9 +1472,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1491,13 +1491,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1516,9 +1516,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1537,35 +1537,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1(v0, v1, base, vl); +void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1574,9 +1574,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1585,13 +1585,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1(v0, v1, v2, base, vl); +void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1602,9 +1602,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1615,13 +1615,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1634,9 +1634,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1649,13 +1649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1670,9 +1670,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1687,13 +1687,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1710,9 +1710,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1729,13 +1729,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1754,9 +1754,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1775,35 +1775,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2(v0, v1, base, vl); +void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1812,9 +1812,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1823,13 +1823,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2(v0, v1, v2, base, vl); +void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1840,9 +1840,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1853,57 +1853,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4(v0, v1, base, vl); +void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2(v0, v1, base, vl); +void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1912,9 +1912,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1923,13 +1923,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2(v0, v1, v2, base, vl); +void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1940,9 +1940,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1953,13 +1953,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1972,9 +1972,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1987,13 +1987,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2008,9 +2008,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2025,13 +2025,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2048,9 +2048,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2067,13 +2067,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2092,9 +2092,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2113,35 +2113,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1(v0, v1, base, vl); +void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2150,9 +2150,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2161,13 +2161,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1(v0, v1, v2, base, vl); +void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2178,9 +2178,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2191,13 +2191,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2210,9 +2210,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2225,13 +2225,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2246,9 +2246,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2263,13 +2263,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2286,9 +2286,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2305,13 +2305,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2330,9 +2330,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2351,35 +2351,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2(v0, v1, base, vl); +void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2388,9 +2388,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2399,13 +2399,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2(v0, v1, v2, base, vl); +void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2416,9 +2416,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2429,57 +2429,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4(v0, v1, base, vl); +void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1(v0, v1, base, vl); +void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2488,9 +2488,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2499,13 +2499,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1(v0, v1, v2, base, vl); +void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2516,9 +2516,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2529,13 +2529,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2548,9 +2548,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2563,13 +2563,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { + return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2584,9 +2584,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2601,13 +2601,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { + return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2624,9 +2624,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2643,13 +2643,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { + return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2668,9 +2668,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2689,35 +2689,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { + return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2(v0, v1, base, vl); +void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2( +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2726,9 +2726,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2737,13 +2737,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2(v0, v1, v2, base, vl); +void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2( +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2754,9 +2754,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2767,57 +2767,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4(v0, v1, base, vl); +void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8(v0, v1, base, vl); +void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2826,9 +2826,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2837,13 +2837,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8(v0, v1, v2, base, vl); +void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2854,9 +2854,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2867,13 +2867,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2886,9 +2886,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2901,13 +2901,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2922,9 +2922,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2939,13 +2939,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2962,9 +2962,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2981,13 +2981,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3006,9 +3006,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3027,35 +3027,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4(v0, v1, base, vl); +void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3064,9 +3064,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3075,13 +3075,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4(v0, v1, v2, base, vl); +void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3092,9 +3092,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3105,13 +3105,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3124,9 +3124,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3139,13 +3139,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3160,9 +3160,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3177,13 +3177,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3200,9 +3200,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3219,13 +3219,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3244,9 +3244,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3265,35 +3265,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2(v0, v1, base, vl); +void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3302,9 +3302,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3313,13 +3313,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2(v0, v1, v2, base, vl); +void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3330,9 +3330,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3343,13 +3343,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3362,9 +3362,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3377,13 +3377,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3398,9 +3398,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3415,13 +3415,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3438,9 +3438,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3457,13 +3457,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3482,9 +3482,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3503,35 +3503,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1(v0, v1, base, vl); +void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3540,9 +3540,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3551,13 +3551,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1(v0, v1, v2, base, vl); +void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3568,9 +3568,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3581,13 +3581,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3600,9 +3600,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3615,13 +3615,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3636,9 +3636,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3653,13 +3653,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3676,9 +3676,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3695,13 +3695,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3720,9 +3720,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3741,35 +3741,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2(v0, v1, base, vl); +void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2( +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3778,9 +3778,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3789,13 +3789,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2(v0, v1, v2, base, vl); +void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2( +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3806,9 +3806,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3819,57 +3819,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4( +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4(v0, v1, base, vl); +void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4(v0, v1, base, vl); +void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3878,9 +3878,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3889,13 +3889,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4(v0, v1, v2, base, vl); +void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3906,9 +3906,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3919,13 +3919,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3938,9 +3938,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3953,13 +3953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3974,9 +3974,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3991,13 +3991,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4014,9 +4014,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4033,13 +4033,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4058,9 +4058,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4079,35 +4079,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2(v0, v1, base, vl); +void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4116,9 +4116,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4127,13 +4127,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2(v0, v1, v2, base, vl); +void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4144,9 +4144,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4157,13 +4157,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4176,9 +4176,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4191,13 +4191,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4212,9 +4212,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4229,13 +4229,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4252,9 +4252,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4271,13 +4271,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4296,9 +4296,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4317,35 +4317,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1(v0, v1, base, vl); +void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4354,9 +4354,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4365,13 +4365,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1(v0, v1, v2, base, vl); +void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4382,9 +4382,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4395,13 +4395,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4414,9 +4414,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4429,13 +4429,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4450,9 +4450,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4467,13 +4467,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4490,9 +4490,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4509,13 +4509,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4534,9 +4534,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4555,35 +4555,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2(v0, v1, base, vl); +void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2( +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4592,9 +4592,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4603,13 +4603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2(v0, v1, v2, base, vl); +void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2( +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4620,9 +4620,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4633,57 +4633,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4( +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4(v0, v1, base, vl); +void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2(v0, v1, base, vl); +void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4692,9 +4692,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4703,13 +4703,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2(v0, v1, v2, base, vl); +void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4720,9 +4720,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4733,13 +4733,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4752,9 +4752,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4767,13 +4767,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4788,9 +4788,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4805,13 +4805,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4828,9 +4828,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4847,13 +4847,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4872,9 +4872,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4893,35 +4893,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1(v0, v1, base, vl); +void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4930,9 +4930,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4941,13 +4941,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1(v0, v1, v2, base, vl); +void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4958,9 +4958,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4971,13 +4971,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4990,9 +4990,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5005,13 +5005,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5026,9 +5026,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5043,13 +5043,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5066,9 +5066,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5085,13 +5085,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5110,9 +5110,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5131,35 +5131,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2(v0, v1, base, vl); +void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5168,9 +5168,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5179,13 +5179,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2(v0, v1, v2, base, vl); +void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5196,9 +5196,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5209,57 +5209,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4(v0, v1, base, vl); +void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1(v0, v1, base, vl); +void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5268,9 +5268,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5279,13 +5279,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1(v0, v1, v2, base, vl); +void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5296,9 +5296,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5309,13 +5309,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5328,9 +5328,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5343,13 +5343,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { + return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5364,9 +5364,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5381,13 +5381,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { + return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5404,9 +5404,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5423,13 +5423,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { + return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5448,9 +5448,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5469,35 +5469,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { + return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2(v0, v1, base, vl); +void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2( +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5506,9 +5506,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5517,13 +5517,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2(v0, v1, v2, base, vl); +void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2( +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5534,9 +5534,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5547,57 +5547,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4(v0, v1, base, vl); +void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2(v0, v1, base, vl); +void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5606,9 +5606,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5617,13 +5617,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2(v0, v1, v2, base, vl); +void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5634,9 +5634,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5647,13 +5647,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5666,9 +5666,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5681,13 +5681,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5702,9 +5702,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5719,13 +5719,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5742,9 +5742,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5761,13 +5761,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5786,9 +5786,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5807,35 +5807,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1(v0, v1, base, vl); +void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5844,9 +5844,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5855,13 +5855,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1(v0, v1, v2, base, vl); +void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5872,9 +5872,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5885,13 +5885,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5904,9 +5904,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5919,13 +5919,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5940,9 +5940,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5957,13 +5957,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5980,9 +5980,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5999,13 +5999,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6024,9 +6024,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6045,35 +6045,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2(v0, v1, base, vl); +void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2( +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6082,9 +6082,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6093,13 +6093,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2(v0, v1, v2, base, vl); +void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2( +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6110,9 +6110,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6123,57 +6123,57 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4( +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4(v0, v1, base, vl); +void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1(v0, v1, base, vl); +void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6182,9 +6182,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6193,13 +6193,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1(v0, v1, v2, base, vl); +void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6210,9 +6210,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6223,13 +6223,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6242,9 +6242,9 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6257,13 +6257,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { + return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6278,9 +6278,9 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6295,13 +6295,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { + return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6318,9 +6318,9 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6337,13 +6337,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { + return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6362,9 +6362,9 @@ // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6383,35 +6383,35 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { + return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2(v0, v1, base, vl); +void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2( +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6420,9 +6420,9 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6431,13 +6431,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2(v0, v1, v2, base, vl); +void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2( +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6448,9 +6448,9 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6461,7318 +6461,30 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4( +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2(v0, v1, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2(v0, v1, v2, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4(v0, v1, base, vl); +void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4_m( diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c @@ -7291,8201 +7291,9 @@ return vlseg2e64ff_v_f64m4(v0, v1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -15494,9 +7302,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -15505,13 +7313,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -15522,9 +7330,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -15535,13 +7343,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -15554,9 +7362,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -15569,13 +7377,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -15590,9 +7398,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -15607,13 +7415,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -15630,9 +7438,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -15649,13 +7457,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -15674,9 +7482,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -15695,13 +7503,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4_m( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -15722,9 +7530,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -15745,13 +7553,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -15760,9 +7568,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -15771,13 +7579,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -15788,9 +7596,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -15801,13 +7609,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -15820,9 +7628,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -15835,13 +7643,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -15856,9 +7664,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -15873,13 +7681,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -15896,9 +7704,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -15915,13 +7723,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -15940,9 +7748,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -15961,13 +7769,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2_m( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -15988,9 +7796,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -16011,13 +7819,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -16026,9 +7834,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -16037,13 +7845,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -16054,9 +7862,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -16067,13 +7875,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -16086,9 +7894,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -16101,13 +7909,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -16122,9 +7930,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -16139,13 +7947,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -16162,9 +7970,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -16181,13 +7989,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -16206,9 +8014,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -16227,13 +8035,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1_m( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -16254,9 +8062,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -16277,13 +8085,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2_m( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -16292,9 +8100,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -16303,13 +8111,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2_m( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -16320,9 +8128,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -16333,13 +8141,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2_m( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -16352,9 +8160,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -16367,13 +8175,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4_m( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -16382,9 +8190,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -16393,6 +8201,6 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c @@ -13,9 +13,9 @@ #include -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -24,9 +24,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -35,13 +35,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -52,9 +52,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -65,13 +65,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -84,9 +84,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -99,13 +99,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -120,9 +120,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -137,13 +137,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -160,9 +160,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -179,13 +179,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -204,9 +204,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -225,13 +225,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -252,9 +252,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -290,9 +290,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -301,13 +301,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -318,9 +318,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -331,13 +331,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -350,9 +350,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -365,13 +365,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -386,9 +386,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -403,13 +403,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -426,9 +426,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -445,13 +445,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -470,9 +470,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -491,13 +491,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -518,9 +518,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -541,13 +541,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -556,9 +556,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -567,13 +567,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -584,9 +584,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -597,13 +597,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -616,9 +616,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -631,13 +631,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -652,9 +652,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -669,13 +669,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -692,9 +692,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -711,13 +711,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -736,9 +736,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -757,13 +757,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -784,9 +784,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -807,13 +807,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -822,9 +822,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -833,13 +833,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -850,9 +850,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -863,13 +863,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -882,9 +882,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -897,13 +897,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -918,9 +918,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -935,13 +935,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -958,9 +958,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -977,13 +977,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1002,9 +1002,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1050,9 +1050,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1073,13 +1073,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1088,9 +1088,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1099,13 +1099,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1116,9 +1116,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1129,13 +1129,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1148,9 +1148,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1163,13 +1163,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1178,9 +1178,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1189,13 +1189,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1204,9 +1204,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1215,13 +1215,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1232,9 +1232,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1245,13 +1245,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1264,9 +1264,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1279,13 +1279,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -1300,9 +1300,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1317,13 +1317,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -1340,9 +1340,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1359,13 +1359,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1384,9 +1384,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1405,13 +1405,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1432,9 +1432,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1455,13 +1455,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1470,9 +1470,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1481,13 +1481,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1498,9 +1498,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1511,13 +1511,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1530,9 +1530,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1545,13 +1545,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -1566,9 +1566,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1583,13 +1583,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -1606,9 +1606,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1625,13 +1625,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1650,9 +1650,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1671,13 +1671,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1698,9 +1698,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1721,13 +1721,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1736,9 +1736,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1747,13 +1747,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1764,9 +1764,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1777,13 +1777,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1796,9 +1796,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1811,13 +1811,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -1832,9 +1832,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1849,13 +1849,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -1872,9 +1872,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1891,13 +1891,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1916,9 +1916,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1937,13 +1937,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1964,9 +1964,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1987,13 +1987,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2002,9 +2002,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2013,13 +2013,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2030,9 +2030,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2043,13 +2043,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2062,9 +2062,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2077,13 +2077,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2092,9 +2092,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2103,13 +2103,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2118,9 +2118,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2129,13 +2129,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2146,9 +2146,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2159,13 +2159,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2178,9 +2178,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2193,13 +2193,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -2214,9 +2214,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2231,13 +2231,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -2254,9 +2254,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2273,13 +2273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -2298,9 +2298,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2319,13 +2319,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2( +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -2346,9 +2346,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -2369,13 +2369,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2384,9 +2384,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2395,13 +2395,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2412,9 +2412,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2425,13 +2425,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2444,9 +2444,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2459,13 +2459,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -2480,9 +2480,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2497,13 +2497,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -2520,9 +2520,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2539,13 +2539,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -2564,9 +2564,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2585,13 +2585,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1( +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -2612,9 +2612,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -2635,13 +2635,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2650,9 +2650,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2661,13 +2661,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2678,9 +2678,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2691,13 +2691,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2710,9 +2710,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2725,13 +2725,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2740,9 +2740,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2751,13 +2751,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2766,9 +2766,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2777,13 +2777,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2794,9 +2794,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2807,13 +2807,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2826,9 +2826,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2841,13 +2841,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -2862,9 +2862,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2879,13 +2879,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -2902,9 +2902,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2921,13 +2921,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -2946,9 +2946,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2967,13 +2967,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1( +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -2994,9 +2994,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1( +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3017,13 +3017,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3032,9 +3032,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3043,13 +3043,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2( +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3060,9 +3060,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3073,13 +3073,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2( +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3092,9 +3092,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3107,13 +3107,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3122,9 +3122,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3133,13 +3133,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3148,9 +3148,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3159,13 +3159,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3176,9 +3176,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3189,13 +3189,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3208,9 +3208,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3223,13 +3223,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -3244,9 +3244,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3261,13 +3261,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -3284,9 +3284,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3303,13 +3303,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -3328,9 +3328,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3349,13 +3349,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -3376,9 +3376,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3399,13 +3399,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3414,9 +3414,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3425,13 +3425,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3442,9 +3442,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3455,13 +3455,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3474,9 +3474,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3489,13 +3489,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -3510,9 +3510,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3527,13 +3527,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -3550,9 +3550,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3569,13 +3569,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -3594,9 +3594,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3615,13 +3615,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -3642,9 +3642,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3665,13 +3665,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3680,9 +3680,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3691,13 +3691,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3708,9 +3708,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3721,13 +3721,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3740,9 +3740,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3755,13 +3755,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -3776,9 +3776,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3793,13 +3793,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -3816,9 +3816,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3835,13 +3835,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -3860,9 +3860,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3881,13 +3881,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -3908,9 +3908,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3931,13 +3931,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3946,9 +3946,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3957,13 +3957,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3974,9 +3974,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3987,13 +3987,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4006,9 +4006,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4021,13 +4021,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4042,9 +4042,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4059,13 +4059,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4082,9 +4082,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4101,13 +4101,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -4126,9 +4126,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4147,13 +4147,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1( +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -4174,9 +4174,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4197,13 +4197,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4212,9 +4212,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4223,13 +4223,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2( +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4240,9 +4240,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4253,13 +4253,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2( +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4272,9 +4272,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4287,13 +4287,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4( +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4302,9 +4302,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4313,13 +4313,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4(v0, v1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4328,9 +4328,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4339,13 +4339,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4356,9 +4356,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4369,13 +4369,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4388,9 +4388,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4403,13 +4403,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4424,9 +4424,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4441,13 +4441,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4464,9 +4464,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4483,13 +4483,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -4508,9 +4508,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4529,13 +4529,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -4556,9 +4556,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4579,13 +4579,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4594,9 +4594,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4605,13 +4605,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4622,9 +4622,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4635,13 +4635,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4654,9 +4654,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4669,13 +4669,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4690,9 +4690,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4707,13 +4707,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4730,9 +4730,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4749,13 +4749,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -4774,9 +4774,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4795,13 +4795,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -4822,9 +4822,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4845,13 +4845,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4860,9 +4860,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4871,13 +4871,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4888,9 +4888,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4901,13 +4901,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4920,9 +4920,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4935,13 +4935,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4956,9 +4956,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4973,13 +4973,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4996,9 +4996,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5015,13 +5015,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -5040,9 +5040,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5061,13 +5061,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1( +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -5088,9 +5088,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5111,13 +5111,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5126,9 +5126,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5137,13 +5137,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2( +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5154,9 +5154,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5167,13 +5167,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2( +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5186,9 +5186,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5201,13 +5201,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4( +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5216,9 +5216,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5227,13 +5227,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4(v0, v1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5242,9 +5242,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5253,13 +5253,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5270,9 +5270,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5283,13 +5283,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5302,9 +5302,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5317,13 +5317,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -5338,9 +5338,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -5355,13 +5355,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -5378,9 +5378,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5397,13 +5397,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -5422,9 +5422,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5443,13 +5443,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2( +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -5470,9 +5470,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5493,13 +5493,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5508,9 +5508,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5519,13 +5519,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5536,9 +5536,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5549,13 +5549,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5568,9 +5568,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5583,13 +5583,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -5604,9 +5604,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -5621,13 +5621,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -5644,9 +5644,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5663,13 +5663,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -5688,9 +5688,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5709,13 +5709,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1( +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -5736,9 +5736,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5759,13 +5759,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5774,9 +5774,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5785,13 +5785,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5802,9 +5802,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5815,13 +5815,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5834,9 +5834,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5849,13 +5849,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5864,9 +5864,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5875,13 +5875,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5890,9 +5890,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5901,13 +5901,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5918,9 +5918,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5931,13 +5931,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5950,9 +5950,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5965,13 +5965,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -5986,9 +5986,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6003,13 +6003,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -6026,9 +6026,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6045,13 +6045,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -6070,9 +6070,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6091,13 +6091,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1( +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -6118,9 +6118,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1( +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6141,13 +6141,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6156,9 +6156,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6167,13 +6167,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2( +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6184,9 +6184,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6197,13 +6197,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2( +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6216,9 +6216,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6231,13 +6231,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6246,9 +6246,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6257,13 +6257,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6272,9 +6272,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6283,13 +6283,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6300,9 +6300,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6313,13 +6313,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6332,9 +6332,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6347,13 +6347,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -6368,9 +6368,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6385,13 +6385,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -6408,9 +6408,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6427,13 +6427,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -6452,9 +6452,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6473,13 +6473,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2( +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -6500,9 +6500,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6523,13 +6523,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6538,9 +6538,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6549,13 +6549,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6566,9 +6566,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6579,13 +6579,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6598,9 +6598,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6613,13 +6613,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -6634,9 +6634,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6651,13 +6651,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -6674,9 +6674,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6693,13 +6693,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -6718,9 +6718,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6739,13 +6739,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1( +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -6766,9 +6766,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6789,13 +6789,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6804,9 +6804,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6815,13 +6815,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2( +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6832,9 +6832,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6845,13 +6845,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2( +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6864,9 +6864,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6879,13 +6879,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4( +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6894,9 +6894,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6905,13 +6905,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4(v0, v1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6920,9 +6920,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6931,13 +6931,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6948,9 +6948,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6961,13 +6961,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6980,9 +6980,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6995,13 +6995,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7016,9 +7016,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7033,13 +7033,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7056,9 +7056,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7075,13 +7075,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -7100,9 +7100,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7121,13 +7121,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1( +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -7148,9 +7148,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7171,13 +7171,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7186,9 +7186,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7197,13 +7197,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2( +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7214,9 +7214,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7227,13 +7227,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2(v0, v1, v2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2( +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7246,9 +7246,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7261,13 +7261,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2(v0, v1, v2, v3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4( +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7276,9 +7276,9 @@ // CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 // CHECK-RV32-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7287,8200 +7287,8 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg5e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg6e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg7e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg8e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg3e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4_m( diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -597,405 +198,6 @@ return vluxseg2ei8_v_i8m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -1187,470 +389,71 @@ return vluxseg2ei16_v_i8m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg4ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg5ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vluxseg4ei32_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -2296,301 +700,35 @@ return vluxseg8ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2753,272 +891,6 @@ return vluxseg2ei8_v_i16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3210,272 +1082,6 @@ return vluxseg2ei16_v_i16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -3667,318 +1273,52 @@ return vluxseg2ei32_v_i16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg4ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -4111,139 +1451,6 @@ return vluxseg4ei64_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4435,139 +1642,6 @@ return vluxseg2ei8_v_i32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -4759,139 +1833,6 @@ return vluxseg2ei16_v_i32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -5083,139 +2024,6 @@ return vluxseg2ei32_v_i32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -6171,448 +2979,49 @@ return vluxseg2ei64_v_i64m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1( @@ -6761,405 +3170,6 @@ return vluxseg2ei8_v_u8m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -7237,517 +3247,118 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg6ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg7ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); +void test_vluxseg8ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg3ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg4ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg2ei16_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1( @@ -7928,405 +3539,6 @@ return vluxseg4ei32_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -8450,280 +3662,14 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1( @@ -8917,272 +3863,6 @@ return vluxseg2ei8_v_u16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -9374,272 +4054,6 @@ return vluxseg2ei16_v_u16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -9831,272 +4245,6 @@ return vluxseg2ei32_v_u16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10251,161 +4399,28 @@ // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg3ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg4ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1( @@ -10599,139 +4614,6 @@ return vluxseg2ei8_v_u32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -10923,139 +4805,6 @@ return vluxseg2ei16_v_u32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -11247,139 +4996,6 @@ return vluxseg2ei32_v_u32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12335,139 +5951,6 @@ return vluxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12659,139 +6142,6 @@ return vluxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -12983,139 +6333,6 @@ return vluxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -13307,139 +6524,6 @@ return vluxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c @@ -7,405 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -597,405 +198,6 @@ return vluxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1187,470 +389,71 @@ return vluxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1764,405 +567,6 @@ return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2296,272 +700,6 @@ return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2746,277 +884,11 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( @@ -3210,272 +1082,6 @@ return vluxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3667,272 +1273,6 @@ return vluxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4111,139 +1451,6 @@ return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4435,139 +1642,6 @@ return vluxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4738,158 +1812,25 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// +void test_vluxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( @@ -5083,139 +2024,6 @@ return vluxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6171,405 +2979,6 @@ return vluxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6739,425 +3148,26 @@ // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( @@ -7351,405 +3361,6 @@ return vluxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -7866,465 +3477,66 @@ // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( @@ -8460,272 +3672,6 @@ return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -8917,272 +3863,6 @@ return vluxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -9363,281 +4043,15 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// +void test_vluxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( @@ -9831,272 +4245,6 @@ return vluxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10275,139 +4423,6 @@ return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10599,139 +4614,6 @@ return vluxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -10923,139 +4805,6 @@ return vluxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -11247,139 +4996,6 @@ return vluxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12318,154 +5934,21 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +// +void test_vluxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( @@ -12659,139 +6142,6 @@ return vluxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -12983,139 +6333,6 @@ return vluxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -13307,139 +6524,6 @@ return vluxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -14395,272 +7479,6 @@ return vluxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -14839,283 +7657,17 @@ return vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +// +void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( @@ -15309,272 +7861,6 @@ return vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15766,272 +8052,6 @@ return vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -16133,347 +8153,81 @@ // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( @@ -16667,272 +8421,6 @@ return vluxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17124,272 +8612,6 @@ return vluxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17581,272 +8803,6 @@ return vluxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c @@ -7,19 +7,6 @@ #include -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -406,197 +393,6 @@ return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -996,197 +792,6 @@ return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1586,184 +1191,6 @@ return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2163,139 +1590,6 @@ return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2562,262 +1856,71 @@ return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3019,197 +2122,6 @@ return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3476,197 +2388,6 @@ return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3933,184 +2654,6 @@ return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4244,197 +2787,6 @@ return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4568,197 +2920,6 @@ return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4892,197 +3053,6 @@ return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5216,1009 +3186,453 @@ return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6231,13 +3645,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6252,13 +3666,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6275,13 +3689,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6300,26 +3714,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6328,13 +3742,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6345,13 +3759,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6364,13 +3778,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6385,13 +3799,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6408,13 +3822,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} +void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6433,26 +3847,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6461,13 +3875,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6478,13 +3892,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6497,13 +3911,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6518,13 +3932,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6541,13 +3955,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6566,217 +3980,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6785,13 +4008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6802,13 +4025,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6821,13 +4044,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6842,13 +4065,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6865,13 +4088,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6890,26 +4113,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6918,13 +4141,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6935,13 +4158,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6954,13 +4177,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6975,13 +4198,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6998,13 +4221,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7023,26 +4246,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7051,13 +4274,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7068,13 +4291,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7087,13 +4310,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7108,13 +4331,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7131,13 +4354,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7156,217 +4379,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7375,13 +4407,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7392,13 +4424,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7411,13 +4443,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7432,13 +4464,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7455,13 +4487,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7480,26 +4512,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7508,13 +4540,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7525,13 +4557,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7544,13 +4576,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7565,13 +4597,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7588,13 +4620,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7613,26 +4645,26 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7641,13 +4673,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7658,13 +4690,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7677,13 +4709,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7698,13 +4730,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7721,13 +4753,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7746,5583 +4778,1622 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} +void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -13331,13 +6402,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -13348,13 +6419,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -13367,13 +6438,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -13388,13 +6459,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -13411,13 +6482,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -13436,963 +6507,407 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4( @@ -14661,197 +7176,6 @@ return vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15118,197 +7442,6 @@ return vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15575,197 +7708,6 @@ return vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) @@ -15981,233 +7923,55 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); + return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( @@ -16476,197 +8240,6 @@ return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -16933,197 +8506,6 @@ return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17390,197 +8772,6 @@ return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -17847,180 +9038,16 @@ return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c @@ -0,0 +1,6925 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +zfh -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c @@ -7135,8912 +7135,893 @@ return vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg_mask.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg_mask.c @@ -7,15151 +7,7132 @@ #include -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c @@ -7135,8912 +7135,893 @@ return vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg_mask.c copy from clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c copy to clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg_mask.c @@ -7,15151 +7,7132 @@ #include -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m(